Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adjust bin values for text features in RFF #99

Merged
merged 27 commits into from
Sep 4, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
da5ba89
adjust bin value based on number of token
sxd929 Aug 28, 2018
1d90e8f
cleanup
sxd929 Aug 28, 2018
3b6c45f
address comments
sxd929 Aug 28, 2018
eea1cf2
merge
sxd929 Aug 28, 2018
b958887
add sum and count to summary
sxd929 Aug 29, 2018
a54131b
change formula
sxd929 Aug 29, 2018
dae829d
add todo
sxd929 Aug 29, 2018
d6ad74f
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Aug 30, 2018
d9dee94
use bins as default
sxd929 Aug 30, 2018
1bb28c6
Merge branch 'xs/adjustBinValueForText' of https://github.com/salesfo…
sxd929 Aug 30, 2018
441ab06
Merge branch 'master' into xs/adjustBinValueForText
sxd929 Aug 30, 2018
546fcfc
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Aug 30, 2018
13cb288
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Aug 30, 2018
c47e9bc
address comments
sxd929 Aug 31, 2018
e8d616b
cleanup
sxd929 Aug 31, 2018
e133bb4
Merge branch 'xs/adjustBinValueForText' of https://github.com/salesfo…
sxd929 Aug 31, 2018
20a9c40
cleanup
sxd929 Aug 31, 2018
e39dad6
Merge branch 'master' into xs/adjustBinValueForText
sxd929 Aug 31, 2018
1c6d359
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Aug 31, 2018
5e2857c
Added textBinsFormula to RFF
tovbinm Aug 31, 2018
a2ffd3a
make scalastyle happy
tovbinm Aug 31, 2018
3c35a13
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Aug 31, 2018
2f4f77a
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Sep 1, 2018
6187e55
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Sep 2, 2018
7ff99fe
Merge branch 'master' into xs/adjustBinValueForText
tovbinm Sep 3, 2018
bdc0608
update docs
tovbinm Sep 4, 2018
dfc1a40
Added docs
tovbinm Sep 4, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
adjust bin value based on number of token
  • Loading branch information
sxd929 committed Aug 28, 2018
commit da5ba89654fb25cd9e37c5e363cd964e6e08983d
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
package com.salesforce.op.filters

import com.salesforce.op.features.TransientFeature
import com.salesforce.op.stages.impl.feature.{Inclusion, NumericBucketizer}
import com.salesforce.op.stages.impl.feature.{HashAlgorithm, Inclusion, NumericBucketizer}
import com.twitter.algebird.Semigroup
import com.twitter.algebird.Monoid._
import com.twitter.algebird.Operators._
Expand Down Expand Up @@ -142,6 +142,7 @@ case class FeatureDistribution
private[op] object FeatureDistribution {

val MaxBins = 100000
val AvgBinValue = 5000

implicit val semigroup: Semigroup[FeatureDistribution] = new Semigroup[FeatureDistribution] {
override def plus(l: FeatureDistribution, r: FeatureDistribution) = l.reduce(r)
Expand All @@ -154,18 +155,16 @@ private[op] object FeatureDistribution {
* @param summary feature summary
* @param value optional processed sequence
* @param bins number of histogram bins
* @param hasher hashing method to use for text and categorical features
* @return feature distribution given the provided information
*/
def apply(
featureKey: FeatureKey,
summary: Summary,
value: Option[ProcessedSeq],
bins: Int,
hasher: HashingTF
bins: Int
): FeatureDistribution = {
val (nullCount, (summaryInfo, distribution)): (Int, (Array[Double], Array[Double])) =
value.map(seq => 0 -> histValues(seq, summary, bins, hasher))
value.map(seq => 0 -> histValues(seq, summary, bins))
.getOrElse(1 -> (Array(summary.min, summary.max) -> Array.fill(bins)(0.0)))

FeatureDistribution(
Expand All @@ -182,18 +181,25 @@ private[op] object FeatureDistribution {
* @param values values to bin
* @param sum summary info for feature (max and min)
* @param bins number of bins to produce
* @param hasher hasing function to use for text
* @return the bin information and the binned counts
*/
// TODO avoid wrapping and unwrapping??
private def histValues(
values: ProcessedSeq,
sum: Summary,
bins: Int,
hasher: HashingTF
bins: Int
): (Array[Double], Array[Double]) = {
values match {
case Left(seq) => Array(sum.min, sum.max) -> hasher.transform(seq).toArray // TODO use summary info to pick hashes
case Left(seq) => {
val minBins = bins
val maxBins = MaxBins
val numBins = math.min(math.max(bins, sum.max/AvgBinValue), maxBins).toInt
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sum.max / AvgBinValue

Copy link
Collaborator

@tovbinm tovbinm Aug 28, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

instead of .toInt perhaps specify the explicit rounding instead.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed! thanks!!


val hasher: HashingTF = new HashingTF(numFeatures = numBins)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we have to create hasher every time or perhaps we can create it once?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the hashing dimension can be different for different features, the minimum number would be bins
but we can have a shared one with numFeatures = bins, and use that for every case if there is not too many tokens; OR we can create a couple shared hashers with different scales, and choose one based on the scale of token numbers
What do you think? I was assuming creating a hasher for every feature will not be very resource consuming

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let see if we can reuse the hashing function without creating HashingTF everytime.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and I think we can. See HashingTF.transform and HashingTF object

.setBinary(false)
.setHashAlgorithm(HashAlgorithm.MurMur3.toString.toLowerCase)
Array(sum.min, sum.max) -> hasher.transform(seq).toArray
}
case Right(seq) => // TODO use kernel fit instead of histogram
if (sum == Summary.empty) {
Array(sum.min, sum.max) -> seq.toArray // the seq will always be empty in this case
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,29 +88,26 @@ private[filters] case class PreparedFeatures(
def getFeatureDistributions(
responseSummaries: Array[(FeatureKey, Summary)],
predictorSummaries: Array[(FeatureKey, Summary)],
bins: Int,
hasher: HashingTF
bins: Int
): (Array[FeatureDistribution], Array[FeatureDistribution]) = {
val responseFeatureDistributions: Array[FeatureDistribution] =
getFeatureDistributions(responses, responseSummaries, bins, hasher)
getFeatureDistributions(responses, responseSummaries, bins)
val predictorFeatureDistributions: Array[FeatureDistribution] =
getFeatureDistributions(predictors, predictorSummaries, bins, hasher)
getFeatureDistributions(predictors, predictorSummaries, bins)

responseFeatureDistributions -> predictorFeatureDistributions
}

private def getFeatureDistributions(
features: Map[FeatureKey, ProcessedSeq],
summaries: Array[(FeatureKey, Summary)],
bins: Int,
hasher: HashingTF
bins: Int
): Array[FeatureDistribution] = summaries.map { case (featureKey, summary) =>
FeatureDistribution(
featureKey = featureKey,
summary = summary,
value = features.get(featureKey),
bins = bins,
hasher = hasher)
bins = bins)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,6 @@ class RawFeatureFilter[T]

@transient protected lazy val log = LoggerFactory.getLogger(this.getClass)

private val hasher: HashingTF = new HashingTF(numFeatures = bins)
.setBinary(false)
.setHashAlgorithm(HashAlgorithm.MurMur3.toString.toLowerCase)


/**
* Get binned counts of the feature distribution and empty count for each raw feature
Expand Down Expand Up @@ -138,8 +134,7 @@ class RawFeatureFilter[T]
.map(_.getFeatureDistributions(
responseSummaries = responseSummariesArr,
predictorSummaries = predictorSummariesArr,
bins = bins,
hasher = hasher))
bins = bins))
.reduce(_ + _) // NOTE: resolved semigroup is IndexedSeqSemigroup
val correlationInfo: Map[FeatureKey, Map[FeatureKey, Double]] =
allFeatureInfo.map(_.correlationInfo).getOrElse {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,10 @@

package com.salesforce.op.filters

import com.salesforce.op.OpParams
import com.salesforce.op.features.{OPFeature, TransientFeature}
import com.salesforce.op.features.TransientFeature
import com.salesforce.op.stages.impl.feature.HashAlgorithm
import com.salesforce.op.test.PassengerSparkFixtureTest
import com.salesforce.op.utils.spark.RichDataset._
import com.salesforce.op.testkit.RandomText
import org.apache.spark.mllib.feature.HashingTF
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
Expand All @@ -52,16 +51,13 @@ class FeatureDistributionTest extends FlatSpec with PassengerSparkFixtureTest wi
val summary =
Array(Summary(0.0, 1.0), Summary(-1.6, 10.6), Summary(0.0, 3.0), Summary(0.0, 0.0), Summary(1.0, 5.0))
val bins = 10
val hasher: HashingTF = new HashingTF(numFeatures = bins)
.setBinary(false)
.setHashAlgorithm(HashAlgorithm.MurMur3.toString.toLowerCase)

val featureKeys: Array[FeatureKey] = features.map(f => (f.name, None))
val processedSeqs: Array[Option[ProcessedSeq]] = values.map { case (isEmpty, processed) =>
if (isEmpty) None else Option(processed)
}
val distribs = featureKeys.zip(summary).zip(processedSeqs).map { case ((key, summ), seq) =>
FeatureDistribution(key, summ, seq, bins, hasher)
FeatureDistribution(key, summ, seq, bins)
}
distribs.foreach{ d =>
d.key shouldBe None
Expand All @@ -78,6 +74,29 @@ class FeatureDistributionTest extends FlatSpec with PassengerSparkFixtureTest wi
distribs(4).summaryInfo.length shouldBe bins
}

it should "be correctly created for text features" in {
val features = Array(description, gender)
val values: Array[(Boolean, ProcessedSeq)] = Array(
(false, Left(RandomText.strings(1, 10).take(10000).toSeq.map(_.value.get))),
(false, Left(RandomText.strings(1, 10).take(1000000).toSeq.map(_.value.get)))
)
val summary = Array(Summary(10000.0, 10000.0), Summary(1000000, 1000000))
val bins = 100
val featureKeys: Array[FeatureKey] = features.map(f => (f.name, None))
val processedSeqs: Array[Option[ProcessedSeq]] = values.map { case (isEmpty, processed) =>
if (isEmpty) None else Option(processed)
}
val distribs = featureKeys.zip(summary).zip(processedSeqs).map { case ((key, summ), seq) =>
FeatureDistribution(key, summ, seq, bins)
}

distribs(0).distribution.length shouldBe 100
distribs(0).distribution.sum shouldBe 10000

distribs(1).distribution.length shouldBe 200
distribs(1).distribution.sum shouldBe 1000000
}

it should "be correctly created for map features" in {
val features = Array(stringMap, numericMap, booleanMap).map(TransientFeature.apply)
val values: Array[Map[String, ProcessedSeq]] = Array(
Expand All @@ -95,7 +114,7 @@ class FeatureDistributionTest extends FlatSpec with PassengerSparkFixtureTest wi
val distribs = features.map(_.name).zip(summary).zip(values).flatMap { case ((name, summaryMaps), valueMaps) =>
summaryMaps.map { case (key, summary) =>
val featureKey = (name, Option(key))
FeatureDistribution(featureKey, summary, valueMaps.get(key), bins, hasher)
FeatureDistribution(featureKey, summary, valueMaps.get(key), bins)
}
}

Expand Down