from pyspark.sql import Row
from pyspark.ml.linearalg import Vectors
bdf = sc.parallelize([
Row(label=1.0, weight=2.0, features=Vectors.dense(1.0)),
Row(label=0.0, weight=2.0, features=Vectors.sparse(1, [], []))]).toDF()
blor = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight")
blorModel = blor.fit(bdf)
blorModel.coefficients
blorModel.intercept
mdf = sc.parallelize([
Row(label=1.0, weight=2.0, features=Vectors.dense(1.0)),
Row(label=0.0, weight=2.0, features=Vectors.sparse(1, [], [])),
Row(label=2.0, weight=2.0, features=Vectors.dense(3.0))]).toDF()
mlor = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight",
family="multinomial")
mlorModel = mlor.fit(mdf)
print(mlorModel.coefficientMatrix)
mlorModel.interceptVector
test0 = sc.parallelize([Row(features=Vectors.dense(-1.0))]).toDF()
result = blorModel.transform(test0).head()
result.prediction
result.probability
result.rawPrediction
test1 = sc.parallelize([Row(features=Vectors.sparse(1, [0], [1.0]))]).toDF()
blorModel.transform(test1).head().prediction
1.0
blor.setParams("vector")
lr_path = temp_path + "/lr"
blor.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
lr2.getMaxIter()
model_path = temp_path + "/lr_path"
blorModel.save(model_path)
model2 = LogisticRegressionModel.load(model_path)
blorModel.coefficients[0] == model2.coefficients[0]
blorModel.intercept == model2.intercept