所以,我有这个依赖项,用于创建表并与 Postgres 交互 . 这是一个示例类:

class ConfigTable {

  this: DBFactory =>

  import driver.api._

  implicit val configKeyMapper = MappedColumnType.base[ConfigKey, String](e => e.toString, s => ConfigKey.withName(s))

  val configs = TableQuery[ConfigMapping]

  class ConfigMapping(tag: Tag) extends Table[Config](tag, "configs") {

    def key = column[ConfigKey]("key")
    def value = column[String]("value")
    def * = (key, value) <> (Config.tupled, Config.unapply _)
  }

  /**
    * add config
    *
    * @param config
    * @return
    */
  def add(config: Config): Try[Config] = try {
    sync(db.run(configs += config)) match {
      case 1 => Success(config)
      case _ => Failure(new Exception("Unable to add config"))
    }
  } catch {
    case ex: PSQLException =>
      if (ex.getMessage.contains("duplicate key value")) Failure(new Exception("alt id already exists."))
      else Failure(new Exception(ex.getMessage))
  }

  def get(key: ConfigKey): Option[Config] = sync(db.run(configs.filter(x => x.key === key).result)).headOption

  def getAll(): Seq[Config] = sync(db.run(configs.result))

}

object ConfigTable extends ConfigTable with PSQLComponent

PSQLComponent是数据库元配置的抽象:

import slick.jdbc.PostgresProfile

trait PSQLComponent extends DBFactory {

  val driver = PostgresProfile

  import driver.api.Database

  val db: Database = Database.forConfig("db.default")
}

DBFactory又是一个抽象:

import slick.jdbc.JdbcProfile

trait DBFactory {

  val driver: JdbcProfile

  import driver.api._

  val db: Database

}

application.conf

db.default {
  driver = "org.postgresql.Driver"
  url = "jdbc:postgresql://localhost:5432/db"
  user = "user"
  password = "pass"
  hikaricp {
    minimumIdle = ${db.default.async-executor.minConnections}
    maximumPoolSize = ${db.default.async-executor.maxConnections}
  }
}

jdbc-defaults.slick.profile = "slick.jdbc.PostgresProfile$"
lagom.persistence.jdbc.create-tables.auto=false

我编译并将此依赖项发布到nexus并尝试在我的Lagom Microservice中使用它 .

这是Loader类:

class SlickExapleAppLoader extends LagomApplicationLoader {

  override def load(context: LagomApplicationContext): LagomApplication = new SlickExampleApp(context) {
    override def serviceLocator: ServiceLocator = NoServiceLocator
  }

  override def loadDevMode(context: LagomApplicationContext): LagomApplication = new SlickExampleApp(context) with LagomDevModeComponents {

  }

  override def describeService = Some(readDescriptor[SlickExampleLMSServiceImpl])
}

abstract class SlickExampleApp(context: LagomApplicationContext)
  extends LagomApplication(context)
    // No Idea which to use and how, nothing clear from doc too.
    //    with ReadSideJdbcPersistenceComponents
    //    with ReadSideSlickPersistenceComponents
    //    with SlickPersistenceComponents
    with AhcWSComponents {


  wire[SlickExampleScheduler]

}

我正在尝试在此调度程序中实现它:

class SlickExampleScheduler @Inject()(lmsService: LMSService,
                                      configuration: Configuration)(implicit ec: ExecutionContext) {
  val brofile = `SomeDomainObject`
  val gson = new Gson()
  val concurrency = Runtime.getRuntime.availableProcessors() * 10

  implicit val timeout: Timeout = 3.minute
  implicit val system: ActorSystem = ActorSystem("LMSActorSystem")
  implicit val materializer: ActorMaterializer = ActorMaterializer()

  // Getting Exception Initializer here..... For ConfigTable ===> ExceptionLine
  val schedulerImplDao = new SchedulerImplDao(ConfigTable)  

  def hitLMSAPI = {

    println("=============>1")

    schedulerImplDao.doSomething()
  }

  system.scheduler.schedule(2.seconds, 2.seconds) {
    println("=============>")
    hitLMSAPI
  }

}

不确定它是否是正确的方法,或者它是不是正确的方法 . 出于可重用性的明显原因,将数据模型与服务分开是项目要求 .

异常堆栈:

17:50:38.666 [info] akka.cluster.Cluster(akka://lms-impl-application) [sourceThread=ForkJoinPool-1-worker-1, akkaTimestamp=12:20:38.665UTC, akkaSource=akka.cluster.Cluster(akka://lms-impl-application), sourceActorSystem=lms-impl-application] - Cluster Node [akka.tcp://lms-impl-application@127.0.0.1:45805] - Started up successfully
17:50:38.707 [info] akka.cluster.Cluster(akka://lms-impl-application) [sourceThread=lms-impl-application-akka.actor.default-dispatcher-6, akkaTimestamp=12:20:38.707UTC, akkaSource=akka.cluster.Cluster(akka://lms-impl-application), sourceActorSystem=lms-impl-application] - Cluster Node [akka.tcp://lms-impl-application@127.0.0.1:45805] - No seed-nodes configured, manual cluster join required
java.lang.ExceptionInInitializerError
    at com.slick.init.impl.SlickExampleScheduler.<init>(SlickExampleScheduler.scala:29)
    at com.slick.init.impl.SlickExampleApp.<init>(SlickExapleAppLoader.scala:42)
    at com.slick.init.impl.SlickExapleAppLoader$$anon$2.<init>(SlickExapleAppLoader.scala:17)
    at com.slick.init.impl.SlickExapleAppLoader.loadDevMode(SlickExapleAppLoader.scala:17)
    at com.lightbend.lagom.scaladsl.server.LagomApplicationLoader.load(LagomApplicationLoader.scala:76)
    at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$5(LagomReloadableDevServerStart.scala:176)
    at play.utils.Threads$.withContextClassLoader(Threads.scala:21)
    at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$3(LagomReloadableDevServerStart.scala:173)
    at scala.Option.map(Option.scala:163)
    at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$2(LagomReloadableDevServerStart.scala:149)
    at scala.util.Success.flatMap(Try.scala:251)
    at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$1(LagomReloadableDevServerStart.scala:147)
    at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:658)
    at scala.util.Success.$anonfun$map$1(Try.scala:255)
    at scala.util.Success.map(Try.scala:213)
    at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
    at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
    at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
    at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
    at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
    at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
    at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
    at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
    at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: java.lang.NullPointerException
    at com.example.db.models.LoginTable.<init>(LoginTable.scala:29)
    at com.example.db.models.LoginTable$.<init>(LoginTable.scala:293)
    at com.example.db.models.LoginTable$.<clinit>(LoginTable.scala)
    ... 24 more